getwd()
## [1] "/Users/alexg/R files/hair_cortisol/skew-normal FINAL"
library(readxl)
library(psych)
library(dlookr)
## Registered S3 methods overwritten by 'dlookr':
## method from
## plot.transform scales
## print.transform scales
##
## Attaching package: 'dlookr'
## The following object is masked from 'package:psych':
##
## describe
## The following object is masked from 'package:base':
##
## transform
library(vtable)
## Loading required package: kableExtra
library(dplyr)
##
## Attaching package: 'dplyr'
## The following object is masked from 'package:kableExtra':
##
## group_rows
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(reshape)
##
## Attaching package: 'reshape'
## The following object is masked from 'package:dplyr':
##
## rename
library(ggplot2)
##
## Attaching package: 'ggplot2'
## The following objects are masked from 'package:psych':
##
## %+%, alpha
library(brms)
## Loading required package: Rcpp
## Loading 'brms' package (version 2.22.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
##
## Attaching package: 'brms'
## The following object is masked from 'package:psych':
##
## cs
## The following object is masked from 'package:stats':
##
## ar
library(rethinking)
## Loading required package: cmdstanr
## This is cmdstanr version 0.8.0
## - CmdStanR documentation and vignettes: mc-stan.org/cmdstanr
## - CmdStan path: /Users/alexg/.cmdstan/cmdstan-2.36.0
## - CmdStan version: 2.36.0
## Loading required package: posterior
## This is posterior version 1.6.1
##
## Attaching package: 'posterior'
## The following object is masked from 'package:dlookr':
##
## entropy
## The following objects are masked from 'package:stats':
##
## mad, sd, var
## The following objects are masked from 'package:base':
##
## %in%, match
## Loading required package: parallel
## rethinking (Version 2.42)
##
## Attaching package: 'rethinking'
## The following objects are masked from 'package:brms':
##
## LOO, stancode, WAIC
## The following objects are masked from 'package:psych':
##
## logistic, logit, sim
## The following object is masked from 'package:stats':
##
## rstudent
library(loo)
## This is loo version 2.8.0
## - Online documentation and vignettes at mc-stan.org/loo
## - As of v2.0.0 loo defaults to 1 core but we recommend using as many as possible. Use the 'cores' argument or set options(mc.cores = NUM_CORES) for an entire session.
##
## Attaching package: 'loo'
## The following object is masked from 'package:rethinking':
##
## compare
library(priorsense)
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ lubridate 1.9.4 ✔ tibble 3.2.1
## ✔ purrr 1.0.4 ✔ tidyr 1.3.1
## ✔ readr 2.1.5
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ ggplot2::%+%() masks psych::%+%()
## ✖ ggplot2::alpha() masks psych::alpha()
## ✖ tidyr::expand() masks reshape::expand()
## ✖ tidyr::extract() masks dlookr::extract()
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::group_rows() masks kableExtra::group_rows()
## ✖ dplyr::lag() masks stats::lag()
## ✖ purrr::map() masks rethinking::map()
## ✖ reshape::rename() masks dplyr::rename()
## ✖ lubridate::stamp() masks reshape::stamp()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(vioplot)
## Loading required package: sm
## Package 'sm', version 2.2-6.0: type help(sm) for summary information
##
## Attaching package: 'sm'
##
## The following object is masked from 'package:dlookr':
##
## binning
##
## Loading required package: zoo
##
## Attaching package: 'zoo'
##
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
library(bayesplot)
## This is bayesplot version 1.12.0
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
## * Does _not_ affect other ggplot2 plots
## * See ?bayesplot_theme_set for details on theme setting
##
## Attaching package: 'bayesplot'
##
## The following object is masked from 'package:posterior':
##
## rhat
##
## The following object is masked from 'package:brms':
##
## rhat
library(bayestestR)
df <- read_xlsx("hair_cort_dog_all.xlsx", col_types = c("text", "text",
"text", "text", "text", "text",
"text", "numeric","text", "skip",
"numeric", "skip", "skip",
"numeric", "skip"))
df <- as.data.frame(df)
dim(df) # will tell you how many rows and columns the dataset has
## [1] 73 11
class(df) # will tell you what data structure has the dataset been assigned
## [1] "data.frame"
head(df)
## number group visit season breed_group coat_colour sex age comorbidity
## 1 c1 stopped v0 winter ret dark Male 43 yes
## 2 c2 stopped v0 autumn mix dark Male 105 yes
## 3 c3 stopped v0 spring ckcs mix Female 117 yes
## 4 c4 stopped v0 summer ret dark Female 108 yes
## 5 c5 stopped v0 summer ret dark Female 110 yes
## 6 c6 stopped v0 winter mix light Female 120 yes
## fat_percent cortisol
## 1 52.21393 4.924220
## 2 38.52059 7.304202
## 3 46.94916 1.590000
## 4 44.46813 0.861570
## 5 39.59363 6.217317
## 6 NA 4.426785
numeric_df <- Filter(is.numeric, df)
describe(numeric_df) # the describe function in psych provides summary stats
## # A tibble: 3 × 26
## described_variables n na mean sd se_mean IQR skewness kurtosis
## <chr> <int> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 age 73 0 95.8 35.6 4.16 44 -0.104 -0.00589
## 2 fat_percent 55 18 40.5 7.82 1.05 7.82 -0.294 1.12
## 3 cortisol 73 0 8.11 16.5 1.93 5.43 4.05 18.7
## # ℹ 17 more variables: p00 <dbl>, p01 <dbl>, p05 <dbl>, p10 <dbl>, p20 <dbl>,
## # p25 <dbl>, p30 <dbl>, p40 <dbl>, p50 <dbl>, p60 <dbl>, p70 <dbl>,
## # p75 <dbl>, p80 <dbl>, p90 <dbl>, p95 <dbl>, p99 <dbl>, p100 <dbl>
plot_normality(numeric_df)
apply(numeric_df, 2, shapiro.test)
## $age
##
## Shapiro-Wilk normality test
##
## data: newX[, i]
## W = 0.97361, p-value = 0.1288
##
##
## $fat_percent
##
## Shapiro-Wilk normality test
##
## data: newX[, i]
## W = 0.97956, p-value = 0.4692
##
##
## $cortisol
##
## Shapiro-Wilk normality test
##
## data: newX[, i]
## W = 0.46269, p-value = 6.756e-15
qqnorm(df$cortisol)
qqline(df$cortisol, col = "red")
qqnorm(log(df$cortisol))
qqline(log(df$cortisol), col = "red")
shapiro.test(log(df$cortisol))
##
## Shapiro-Wilk normality test
##
## data: log(df$cortisol)
## W = 0.94725, p-value = 0.004126
summary(df$cortisol)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.4141 1.4119 2.3331 8.1089 6.8455 104.6172
summary(log(df$cortisol))
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.8817 0.3449 0.8472 1.1816 1.9236 4.6503
df$lgCort <- log(df$cortisol)
summary(df$lgCort)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.8817 0.3449 0.8472 1.1816 1.9236 4.6503
hist(df$lgCort)
df$breed <- df$breed_group
df$breed <- factor(df$breed, levels = c("mix", "ckcs", "pug", "ret", "other"))
head(df$breed)
## [1] ret mix ckcs ret ret mix
## Levels: mix ckcs pug ret other
sumtable(df)
| Variable | N | Mean | Std. Dev. | Min | Pctl. 25 | Pctl. 75 | Max |
|---|---|---|---|---|---|---|---|
| group | 73 | ||||||
| … completed | 42 | 58% | |||||
| … stopped | 31 | 42% | |||||
| visit | 73 | ||||||
| … v0 | 52 | 71% | |||||
| … v1 | 21 | 29% | |||||
| season | 73 | ||||||
| … autumn | 21 | 29% | |||||
| … spring | 14 | 19% | |||||
| … summer | 22 | 30% | |||||
| … winter | 16 | 22% | |||||
| breed_group | 73 | ||||||
| … ckcs | 7 | 10% | |||||
| … mix | 16 | 22% | |||||
| … other | 26 | 36% | |||||
| … pug | 7 | 10% | |||||
| … ret | 17 | 23% | |||||
| coat_colour | 73 | ||||||
| … dark | 30 | 41% | |||||
| … light | 27 | 37% | |||||
| … mix | 16 | 22% | |||||
| sex | 73 | ||||||
| … Female | 43 | 59% | |||||
| … Male | 30 | 41% | |||||
| age | 73 | 96 | 36 | 16 | 73 | 117 | 182 |
| comorbidity | 73 | ||||||
| … no | 15 | 21% | |||||
| … yes | 58 | 79% | |||||
| fat_percent | 55 | 40 | 7.8 | 18 | 37 | 45 | 61 |
| cortisol | 73 | 8.1 | 16 | 0.41 | 1.4 | 6.8 | 105 |
| lgCort | 73 | 1.2 | 1.2 | -0.88 | 0.34 | 1.9 | 4.7 |
| breed | 73 | ||||||
| … mix | 16 | 22% | |||||
| … ckcs | 7 | 10% | |||||
| … pug | 7 | 10% | |||||
| … ret | 17 | 23% | |||||
| … other | 26 | 36% |
par(mfrow = c(1,1))
vioplot(lgCort ~ breed, col = "firebrick",
data = df)
stripchart(lgCort ~ breed, vertical = TRUE, method = "jitter",
col = "steelblue3", data = df, pch = 20)
df$slgCort <- standardize(df$lgC)
summary(df$slgCort)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.7079 -0.6925 -0.2768 0.0000 0.6142 2.8713
hist(df$slgCort)
df2 <- na.omit(df)
model <- brm(slgCort ~ breed + (1 | visit), family = skew_normal(), data = df2)
default_prior(slgCort ~ breed + (1 | visit),
family = skew_normal(),
data = df2)
## prior class coef group resp dpar nlpar lb ub
## normal(0, 4) alpha
## (flat) b
## (flat) b breedckcs
## (flat) b breedother
## (flat) b breedpug
## (flat) b breedret
## student_t(3, -0.1, 2.5) Intercept
## student_t(3, 0, 2.5) sd 0
## student_t(3, 0, 2.5) sd visit 0
## student_t(3, 0, 2.5) sd Intercept visit 0
## student_t(3, 0, 2.5) sigma 0
## source
## default
## default
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## default
## default
## (vectorized)
## (vectorized)
## default
No published data about effects on breed, but this is plausible However, unclear as to which breeds will differ and which way. Therefore, use a regularising prior but keep it neutral and broad.
# Set individual priors
prior_int <- set_prior("normal(0, 0.5)", class = "Intercept")
prior_sig <- set_prior("exponential(1)", class = "sigma")
prior_b <- set_prior("normal(0, 1)", class = "b")
prior_sd <- set_prior("normal(0, 1)", class = "sd")
prior_alpha <- set_prior("normal(4, 8)", class = "alpha")
# Combine priors into list
priors <- c(prior_int, prior_sig, prior_b, prior_sd, prior_alpha)
x <- seq(-3, 3, length.out = 100)
y <- dnorm(x, mean = 0, sd = 1)
plot(y ~ x, type = "l")
x <- seq(0, 3, length.out = 100)
y <- dexp(x, rate = 0.5)
plot(y ~ x, type = "l")
Based on distribution of log normal hair cortisol, expect things to be skewed to the right. Try different levels of alpha for skew normal.
x <- seq(-3, 5, length.out = 100)
y <- dskew_normal(x, mu = 0, sigma = 1, alpha = 4)
plot(y ~ x, type = "l")
So, expect alpha to be positive… perhaps 4, but keep sd broad to allow some flexibility
x <- seq(0, 5, length.out = 100)
y <- dnorm(x, mean = 0, sd = 2)
plot(y ~ x, type = "l")
x <- seq(-3, 3, length.out = 100)
y <- dnorm(x, mean = 0, sd = 1.0)
plot(y ~ x, type = "l")
Increased adapt_delta >0.8 (0.9 here), as had divergent transitions
set.seed(666)
model <- brm(slgCort ~ breed + (1 | visit),
family = skew_normal(),
data = df,
prior = priors,
control=list(adapt_delta=0.99999999, stepsize = 0.001, max_treedepth = 15),
iter = 8000, warmup = 2000,
cores = 4,
save_pars = save_pars(all =TRUE),
sample_prior = TRUE)
## Compiling Stan program...
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## using C compiler: ‘Apple clang version 17.0.0 (clang-1700.0.13.5)’
## using SDK: ‘MacOSX15.5.sdk’
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DUSE_STANC3 -DSTRICT_R_HEADERS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION -D_HAS_AUTO_PTR_ETC=0 -include '/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
## /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
## 679 | #include <cmath>
## | ^~~~~~~
## 1 error generated.
## make: *** [foo.o] Error 1
## Start sampling
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Warning: There were 1 divergent transitions after warmup. See
## https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.
## Warning: Examine the pairs() plot to diagnose sampling problems
summary(model)
## Warning: There were 1 divergent transitions after warmup. Increasing
## adapt_delta above 0.99999999 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: skew_normal
## Links: mu = identity; sigma = identity; alpha = identity
## Formula: slgCort ~ breed + (1 | visit)
## Data: df (Number of observations: 73)
## Draws: 4 chains, each with iter = 8000; warmup = 2000; thin = 1;
## total post-warmup draws = 24000
##
## Multilevel Hyperparameters:
## ~visit (Number of levels: 2)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.36 0.36 0.01 1.33 1.00 7461 8418
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.10 0.30 -0.73 0.51 1.00 10036 11709
## breedckcs 0.15 0.38 -0.63 0.83 1.00 13560 14403
## breedpug 0.19 0.36 -0.56 0.86 1.00 15367 14174
## breedret 0.09 0.28 -0.48 0.62 1.00 12021 15125
## breedother 0.11 0.26 -0.39 0.62 1.00 14971 15359
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.04 0.10 0.87 1.26 1.00 15032 15354
## alpha 5.43 3.48 1.59 15.07 1.00 9626 6806
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(model)
Looking for hairy caterpillars
mcmc_plot(model, type = 'rank_overlay')
Usually better than the compatability intervals given in the summary ### a. ckcs
draws <- as.matrix(model)
HPDI(draws[,2], 0.97) # 2nd column is draws for ckcs
## |0.97 0.97|
## -0.6869632 0.9491342
draws <- as.matrix(model)
HPDI(draws[,3], 0.97) # 3rd column is draws for pug
## |0.97 0.97|
## -0.6234917 0.9448040
draws <- as.matrix(model)
HPDI(draws[,4], 0.97) # 4th column is draws for ret
## |0.97 0.97|
## -0.5299995 0.6851662
draws <- as.matrix(model)
HPDI(draws[,5], 0.97) # 5th column is draws for other
## |0.97 0.97|
## -0.4500417 0.6752678
bayes_R2(model, probs = c(0.015, 0.5, 0.985)) # 0.015, 0.5, 0.985 are the quantiles
## Estimate Est.Error Q1.5 Q50 Q98.5
## R2 0.04655765 0.02849586 0.00638642 0.04106699 0.1281884
loo_R2(model, probs = c(0.015, 0.5, 0.985)) # 0.015, 0.5, 0.985 are the quantiles
## Estimate Est.Error Q1.5 Q50 Q98.5
## R2 -0.08794446 0.03236296 -0.1815256 -0.08316866 -0.03393011
checks whether actual data is similar to simulated data.
pp_check(model, ndraws = 100)
par(mfrow = c(1,1))
pp_check(model, type = "hist", ndraws = 11, binwidth = 0.25) # separate histograms of 11 MCMC draws vs actual data
pp_check(model, type = "error_hist", ndraws = 11) # separate histograms of errors for 11 draws
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
pp_check(model, type = "scatter_avg", ndraws = 100) # scatter plot
pp_check(model, type = "stat_2d") # scatterplot of joint posteriors
## Using all posterior draws for ppc type 'stat_2d' by default.
## Note: in most cases the default test statistic 'mean' is too weak to detect anything of interest.
# PPC functions for predictive checks based on (approximate) leave-one-out (LOO) cross-validation
pp_check(model, type = "loo_pit_overlay", ndraws = 1000)
## Warning: Found 1 observations with a pareto_k > 0.7 in model '.x1'. We
## recommend to set 'moment_match = TRUE' in order to perform moment matching for
## problematic observations.
## NOTE: The kernel density estimate assumes continuous observations and is not optimal for discrete observations.
pp_check(model, type = "error_scatter_avg")
## Using all posterior draws for ppc type 'error_scatter_avg' by default.
pairs(model)
loo_model <- loo(model, moment_match = TRUE)
loo_model
##
## Computed from 24000 by 73 log-likelihood matrix.
##
## Estimate SE
## elpd_loo -105.0 6.1
## p_loo 6.7 1.5
## looic 210.0 12.2
## ------
## MCSE of elpd_loo is 0.1.
## MCSE and ESS estimates assume MCMC draws (r_eff in [0.4, 1.2]).
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
First, check the sensitivity of the prior and likelihood to power-scaling. Posterior and posteriors resulting from power-scaling.
powerscale_sensitivity(model, variable = c("b_Intercept", "b_breedckcs", "b_breedother", "b_breedpug", "b_breedret"), , facet_rows = "variable")
## Sensitivity based on cjs_dist
## Prior selection: all priors
## Likelihood selection: all data
##
## variable prior likelihood diagnosis
## b_Intercept 0.036 0.037 -
## b_breedckcs 0.029 0.086 -
## b_breedother 0.020 0.077 -
## b_breedpug 0.022 0.094 -
## b_breedret 0.030 0.079 -
powerscale_plot_dens(model, variable = c("b_Intercept", "b_breedckcs", "b_breedother", "b_breedpug", "b_breedret"), facet_rows = "variable")
check_prior(model, effects = "all")
## Parameter Prior_Quality
## 1 b_Intercept informative
## 2 b_breedckcs informative
## 3 b_breedpug informative
## 4 b_breedret informative
## 5 b_breedother informative
## 6 sd_visit__Intercept informative
Can simulate data just on the priors. Fit model but only consider prior when fitting model. If this looks reasonable, it helps to confirm that your priors were reasonable
set.seed(666)
model_priors_only <- brm(slgCort ~ breed + (1 | visit),
family = skew_normal(),
prior = priors,
data = df,
sample_prior = "only")
## Compiling Stan program...
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## using C compiler: ‘Apple clang version 17.0.0 (clang-1700.0.13.5)’
## using SDK: ‘MacOSX15.5.sdk’
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DUSE_STANC3 -DSTRICT_R_HEADERS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION -D_HAS_AUTO_PTR_ETC=0 -include '/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
## /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
## 679 | #include <cmath>
## | ^~~~~~~
## 1 error generated.
## make: *** [foo.o] Error 1
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 5.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.56 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.02 seconds (Warm-up)
## Chain 1: 0.014 seconds (Sampling)
## Chain 1: 0.034 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.019 seconds (Warm-up)
## Chain 2: 0.016 seconds (Sampling)
## Chain 2: 0.035 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 2e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.018 seconds (Warm-up)
## Chain 3: 0.014 seconds (Sampling)
## Chain 3: 0.032 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 1e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.01 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.02 seconds (Warm-up)
## Chain 4: 0.015 seconds (Sampling)
## Chain 4: 0.035 seconds (Total)
## Chain 4:
pp_check(model_priors_only, ndraws = 100) +
xlim(c(-10, 10))
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_density()`).
as_draws_df(model) %>%
select(b_Intercept:sigma) %>%
cov() %>%
round(digits = 3)
## Warning: Dropping 'draws_df' class as required metadata was removed.
## b_Intercept b_breedckcs b_breedpug b_breedret b_breedother
## b_Intercept 0.093 -0.038 -0.036 -0.040 -0.037
## b_breedckcs -0.038 0.141 0.035 0.042 0.033
## b_breedpug -0.036 0.035 0.127 0.035 0.034
## b_breedret -0.040 0.042 0.035 0.080 0.034
## b_breedother -0.037 0.033 0.034 0.034 0.066
## sd_visit__Intercept 0.002 -0.006 -0.004 -0.003 -0.001
## sigma 0.005 0.002 -0.001 0.002 -0.001
## sd_visit__Intercept sigma
## b_Intercept 0.002 0.005
## b_breedckcs -0.006 0.002
## b_breedpug -0.004 -0.001
## b_breedret -0.003 0.002
## b_breedother -0.001 -0.001
## sd_visit__Intercept 0.129 0.000
## sigma 0.000 0.010
NB Uses posterior_predict
# use posterior predict to simulate predictions
ppd <- posterior_predict(model)
par(mfrow = c(2,2))
stripchart(slgCort ~ breed, vertical = TRUE, method = "jitter",
col = "steelblue3", data = df, pch = 20, main = "Observed")
stripchart(ppd[sample(seq(1, dim(ppd)[1]), 1),] ~ breed, vertical = TRUE, method = "jitter",
col = "firebrick3", data = df, pch = 20, main = "PPD")
stripchart(ppd[sample(seq(1, dim(ppd)[1]), 1),] ~ breed, vertical = TRUE, method = "jitter",
col = "firebrick3", data = df, pch = 20, main = "PPD")
stripchart(ppd[sample(seq(1, dim(ppd)[1]), 1),] ~ breed, vertical = TRUE, method = "jitter",
col = "firebrick3", data = df, pch = 20, main = "PPD")
plot(conditional_effects(model), ask = FALSE)
ce <- conditional_effects(model, effects = "breed", prob = 0.97)
ce_df <- ce[[1]][c(1, 6:9)]
ggplot(ce_df, aes(x=breed, y=estimate__, group=1)) +
geom_errorbar(width=.1, aes(ymin=lower__, ymax=upper__), colour=c("#F8766D", "#A3A500","#00BF7D",
"#00B0F6", "#E776F3"), linewidth = 1) +
geom_point(shape=21, size=6, fill=c("#F8766D", "#A3A500","#00BF7D",
"#00B0F6", "#E776F3")) +
theme_bw() +
labs(title = "Conditional effect of breed on hair cortisol") +
labs(y = paste0("Log Hair Cortisol (standardised)")) +
labs(x = paste0("breed")) +
theme(axis.title.y = element_text(size=12, face="bold"),
axis.title.x = element_text(size=12, face="bold"),
title = element_text(size=12, face="bold"),
plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(color = "grey25", size = 12),
axis.text.y = element_text(color = "grey50", size = 10))
mcmc_plot(model,
variable = c(
"b_breedckcs",
"b_breedpug",
"b_breedret",
"b_breedother"))
mcmc_plot(model, variable = c(
"b_Intercept",
"sigma",
"b_breedckcs",
"b_breedpug",
"b_breedret",
"b_breedother"))
posterior <- as.matrix(model)
mcmc_areas(posterior,
pars = c("b_Intercept", "sigma",
"b_breedckcs",
"b_breedpug",
"b_breedret",
"b_breedother"),
# arbitrary threshold for shading probability mass
prob = 0.75)
posterior <- as.matrix(model)
mcmc_areas(posterior,
pars = c("b_breedckcs",
"b_breedpug",
"b_breedret",
"b_breedother",
"prior_b"),
prob = 0.75) + # arbitrary threshold for shading probability mass
theme_classic() +
labs(title = "Prior vs posterior distribution for breed") +
labs(y = "") +
labs(x = paste0("Possible parameter values")) +
scale_y_discrete(labels=c("prior_b" = "Breed prior",
"b_breedother" = "Other posterior",
"b_breedret" = "Retriever posterior",
"b_breedpug" = "Pug posterior",
"b_breedckcs" = "CKCS posterior"),
limits = c("prior_b", "b_breedother",
"b_breedret", "b_breedpug",
"b_breedckcs")) +
theme(axis.title.y = element_text(size=12, face="bold"),
axis.title.x = element_text(size=12, face="bold"),
title = element_text(size=12, face="bold"),
plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(color = "grey50", size = 12),
axis.text.y = element_text(color = "grey8",size = 12))
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.
# Focus on describing posterior
hdi_range <- hdi(model, ci = c(0.65, 0.70, 0.80, 0.89, 0.97))
plot(hdi_range)
draws <- as.matrix(model)
mean(draws[,2] >0)
## [1] 0.6662917
mean(draws[,2] <0)
## [1] 0.3337083
HPDI(draws[,2], prob=0.97)
## |0.97 0.97|
## -0.6869632 0.9491342
draws <- as.matrix(model)
mean(draws[,3] >0)
## [1] 0.7137083
mean(draws[,3] <0)
## [1] 0.2862917
HPDI(draws[,3], prob=0.97)
## |0.97 0.97|
## -0.6234917 0.9448040
draws <- as.matrix(model)
mean(draws[, 4] >0)
## [1] 0.633625
mean(draws[, 4] <0)
## [1] 0.366375
HPDI(draws[, 4], prob=0.97)
## |0.97 0.97|
## -0.5299995 0.6851662
draws <- as.matrix(model)
mean(draws[, 5] >0)
## [1] 0.6578333
mean(draws[, 5] <0)
## [1] 0.3421667
HPDI(draws[, 5], prob=0.97)
## |0.97 0.97|
## -0.4500417 0.6752678
# create new dataframe which contains results of the first dog
new_data <- rbind(df[1,], df[1,], df[1,], df[1,], df[1,])
# Now change one category to be different
new_data$breed <- c("mix", "ckcs", "pug", "ret", "other")
# Visualise df to make sure it has worked
new_data
## number group visit season breed_group coat_colour sex age comorbidity
## 1 c1 stopped v0 winter ret dark Male 43 yes
## 2 c1 stopped v0 winter ret dark Male 43 yes
## 3 c1 stopped v0 winter ret dark Male 43 yes
## 4 c1 stopped v0 winter ret dark Male 43 yes
## 5 c1 stopped v0 winter ret dark Male 43 yes
## fat_percent cortisol lgCort breed slgCort
## 1 52.21393 4.92422 1.594166 mix 0.3415375
## 2 52.21393 4.92422 1.594166 ckcs 0.3415375
## 3 52.21393 4.92422 1.594166 pug 0.3415375
## 4 52.21393 4.92422 1.594166 ret 0.3415375
## 5 52.21393 4.92422 1.594166 other 0.3415375
# Now get mean predictions from the draws of the model
pred_means <- posterior_predict(model, newdata = new_data)
# Compare difference in means for each breedversus mix
differenceCKCS <- pred_means[,1] - pred_means[,2]
differencePug <- pred_means[,1] - pred_means[,3]
differenceRet <- pred_means[,1] - pred_means[,4]
differenceOther <- pred_means[,1] - pred_means[,5]
par(mfrow = c(2,2))
# Examine mean of difference
mean(differenceCKCS)
## [1] -0.1366653
# View histogram of this
hist(differenceCKCS)
# Create HPDI
HPDI(differenceCKCS, 0.93)
## |0.93 0.93|
## -2.918091 2.707034
# Examine mean of difference
mean(differencePug)
## [1] -0.1776636
# View histogram of this
hist(differencePug)
# Create HPDI
HPDI(differencePug, 0.93)
## |0.93 0.93|
## -3.013737 2.597078
# Examine mean of difference
mean(differenceRet)
## [1] -0.08584516
# View histogram of this
hist(differenceRet)
# Create HPDI
HPDI(differenceRet, 0.93)
## |0.93 0.93|
## -2.995711 2.595451
# Examine mean of difference
mean(differenceOther)
## [1] -0.09125235
# View histogram of this
hist(differenceOther)
# Create HPDI
HPDI(differenceOther, 0.93)
## |0.93 0.93|
## -2.81857 2.68885
# create new dataframe which contains results of all dogs
new_data1 <- df
# Now change one category to be different
new_data1$breed_group <- c("mix")
# create new dataframe which contains result sof all dogs
new_data2 <- df
# Now change one category to be different
new_data2$breed_group <- c("ckcs")
# Now get predictions from the draws of the models
pred_nd1 <- posterior_predict(model, newdata = new_data1)
pred_nd2 <- posterior_predict(model, newdata = new_data2)
pred_diff <- pred_nd1 - pred_nd2
pred_diff <- data.frame(pred_diff)
# Create mean of differences for each column (dog) of the dataframe
pred_diff_ckcs <- apply(pred_diff, 2, mean)
# View histogram of mean differences
hist(pred_diff_ckcs)
# Examine mean of difference
mean(pred_diff_ckcs)
## [1] -0.001907342
# View histogram of this
HPDI(pred_diff_ckcs, 0.97)
## |0.97 0.97|
## -0.02148821 0.01873063
# create new dataframe which contains results of all dogds
new_data2 <- df
# Now change one category to be different
new_data2$breed_group <- c("pug")
# Now get predictions from the draws of the models
pred_nd1 <- posterior_predict(model, newdata = new_data1)
pred_nd2 <- posterior_predict(model, newdata = new_data2)
pred_diff <- pred_nd1 - pred_nd2
pred_diff <- data.frame(pred_diff)
# Create mean of differences for each column (dog) of the dataframe
pred_diff_pug <- apply(pred_diff, 2, mean)
# View histogram of mean differences
hist(pred_diff_pug)
# Examine mean of difference
mean(pred_diff_pug)
## [1] 0.000145669
# View histogram of this
HPDI(pred_diff_pug, 0.97)
## |0.97 0.97|
## -0.01814732 0.01544251
# create new dataframe which contains results of all dogs
new_data2 <- df
# Now change one category to be different
new_data2$breed_group <- c("other")
# Now get predictions from the draws of the models
pred_nd1 <- posterior_predict(model, newdata = new_data1)
pred_nd2 <- posterior_predict(model, newdata = new_data2)
pred_diff <- pred_nd1 - pred_nd2
pred_diff <- data.frame(pred_diff)
# Create mean of differences for each column (dog) of the dataframe
pred_diff_other <- apply(pred_diff, 2, mean)
# View histogram of mean differences
hist(pred_diff_other)
# Examine mean of difference
mean(pred_diff_other)
## [1] 0.0006893843
# Create HPDI
HPDI(pred_diff_other, 0.97)
## |0.97 0.97|
## -0.01877564 0.02681589
# create new dataframe which contains results of all dogs
new_data2 <- df
# Now change one category to be different
new_data2$breed_group <- c("ret")
# Now get predictions from the draws of the models
pred_nd1 <- posterior_predict(model, newdata = new_data1)
pred_nd2 <- posterior_predict(model, newdata = new_data2)
pred_diff <- pred_nd1 - pred_nd2
pred_diff <- data.frame(pred_diff)
# Create mean of differences for each column (dog) of the dataframe
pred_diff_ret <- apply(pred_diff, 2, mean)
# View histogram of mean differences
hist(pred_diff_ret)
# Examine mean of difference
mean(pred_diff_ret)
## [1] 0.000156698
# Create HPDI
HPDI(pred_diff_ret, 0.97)
## |0.97 0.97|
## -0.01849546 0.01898301
# Set individual priors
prior_int <- set_prior("normal(0, 1)", class = "Intercept")
prior_b <- set_prior("normal(0, 1)", class = "b")
prior_sd <- set_prior("normal(0, 2)", class = "sd")
prior_alpha <- set_prior("normal(4, 2)", class = "alpha")
# Combine priors into list
priors2 <- c(prior_int, prior_b, prior_sd, prior_alpha)
Increased adapt_delta >0.8 (0.9 here), as had divergent transitions
set.seed(666)
model2 <- brm(bf(slgCort ~ breed + (1 | visit),
sigma ~ breed),
family = skew_normal(),
prior = priors2,
data = df,
control=list(adapt_delta=0.9999, stepsize = 0.001, max_treedepth =15),
iter = 8000, warmup = 2000,
cores = 4,
save_pars = save_pars(all =TRUE),
sample_prior = TRUE)
## Compiling Stan program...
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## using C compiler: ‘Apple clang version 17.0.0 (clang-1700.0.13.5)’
## using SDK: ‘MacOSX15.5.sdk’
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DUSE_STANC3 -DSTRICT_R_HEADERS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION -D_HAS_AUTO_PTR_ETC=0 -include '/Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
## /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
## 679 | #include <cmath>
## | ^~~~~~~
## 1 error generated.
## make: *** [foo.o] Error 1
## Start sampling
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Found more than one class "stanfit" in cache; using the first, from namespace 'rethinking'
## Also defined by 'rstan'
## Warning: There were 4 divergent transitions after warmup. See
## https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.
## Warning: Examine the pairs() plot to diagnose sampling problems
summary(model2)
## Warning: There were 4 divergent transitions after warmup. Increasing
## adapt_delta above 0.9999 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: skew_normal
## Links: mu = identity; sigma = log; alpha = identity
## Formula: slgCort ~ breed + (1 | visit)
## sigma ~ breed
## Data: df (Number of observations: 73)
## Draws: 4 chains, each with iter = 8000; warmup = 2000; thin = 1;
## total post-warmup draws = 24000
##
## Multilevel Hyperparameters:
## ~visit (Number of levels: 2)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.60 0.67 0.01 2.48 1.00 6911 9757
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -0.07 0.47 -1.06 0.91 1.00 7847 8740
## sigma_Intercept 0.05 0.19 -0.30 0.45 1.00 10166 12567
## breedckcs 0.05 0.41 -0.73 0.92 1.00 11860 12210
## breedpug 0.26 0.48 -0.65 1.25 1.00 13689 14662
## breedret -0.03 0.32 -0.67 0.59 1.00 12001 14679
## breedother 0.15 0.32 -0.49 0.78 1.00 12235 13974
## sigma_breedckcs -0.11 0.36 -0.78 0.65 1.00 11669 12915
## sigma_breedpug 0.16 0.35 -0.48 0.89 1.00 12329 14295
## sigma_breedret -0.11 0.27 -0.64 0.42 1.00 11478 14820
## sigma_breedother 0.04 0.24 -0.43 0.50 1.00 11624 14473
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## alpha 4.19 1.41 1.86 7.38 1.00 15381 13286
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
loo_model2 <- loo(model2, moment_match = TRUE)
loo_model2
##
## Computed from 24000 by 73 log-likelihood matrix.
##
## Estimate SE
## elpd_loo -109.7 7.2
## p_loo 11.3 2.6
## looic 219.4 14.3
## ------
## MCSE of elpd_loo is 0.1.
## MCSE and ESS estimates assume MCMC draws (r_eff in [0.4, 1.2]).
##
## All Pareto k estimates are good (k < 0.7).
## See help('pareto-k-diagnostic') for details.
model <- add_criterion(model, "loo")
model2 <- add_criterion(model2, "loo")
## Warning: Found 4 observations with a pareto_k > 0.7 in model 'model2'. We
## recommend to set 'moment_match = TRUE' in order to perform moment matching for
## problematic observations.
loo_compare(model, model2)
## elpd_diff se_diff
## model 0.0 0.0
## model2 -4.4 1.9
Model 1 is a better fit so keep this